In [1]:
import numpy as np
import tensorflow as tf
#import matplotlib.pyplot as plt
% matplotlib inline
plt.style.use('ggplot')
In [2]:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot = True)
In [3]:
print(mnist.train.images.shape)
In [4]:
plt.imshow(mnist.train.images[0].reshape(28,28))
for i in range(0,9):
if mnist.train.labels[0][i] == 1:
print('LABEL:{}' .format(i))
In [7]:
input_nodes = 28*28
output_nodes = 10
learning_rate = 0.001
# Creating the graph
tf.reset_default_graph()
x = tf.placeholder(dtype=tf.float32, shape=[None, input_nodes])
y = tf.placeholder(dtype=tf.float32, shape=[None, output_nodes])
W = tf.get_variable(dtype=tf.float32,
initializer=tf.random_normal_initializer(mean = 0, stddev=0.1),
shape=[input_nodes, output_nodes],
name = 'W')
b = tf.get_variable(dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
shape =[output_nodes],
name = 'b'
)
h = tf.nn.softmax(tf.matmul(x,W) + b)
# Cross Entropy
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(h), reduction_indices=[1]))
optimiser = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
In [36]:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Train
batch_size = 100
epoch = 10
epoch_size = 1000
n_iter = epoch * epoch_size
for iter in range(n_iter+1):
xs, ys = mnist.train.next_batch(batch_size)
sess.run(optimiser, feed_dict={x: xs, y: ys})
if iter%epoch_size ==0:
print("Epoch: {} Cross Entropy: {:.2f} " .format(int(iter/epoch_size),
cross_entropy.eval(feed_dict={x:xs, y:ys}, session=sess)))
In [33]:
# Accuracy on training set
correct_pediction = tf.equal(tf.arg_max(y,1), tf.arg_max(h,1))
accuracy = tf.reduce_mean(tf.cast(correct_pediction, tf.float32))
t_accur = sess.run(accuracy, feed_dict={x:mnist.train.images, y:mnist.train.labels})
print('Test Accuracy: {:4.2f}%' .format(100*t_accur))
In [34]:
v_accur = sess.run(accuracy, feed_dict={x:mnist.validation.images, y: mnist.validation.labels})
print('Valid Accuracy: {:4.2f}%' .format(100*v_accur))
In [35]:
t_accur = sess.run(accuracy, feed_dict={x:mnist.test.images, y: mnist.test.labels})
print('Test Accuracy: {:4.2f}%' .format(100*t_accur))